Use size-independent '%p' format string in preference to '%08lx'.
Signed-off-by: keir.fraser@cl.cam.ac.uk
{
unsigned long fixup = search_one_table(
__start___pre_ex_table, __stop___pre_ex_table-1, addr);
- DPRINTK("Pre-exception: %08lx -> %08lx\n", addr, fixup);
+ DPRINTK("Pre-exception: %p -> %p\n", addr, fixup);
return fixup;
}
if ( unlikely(!pfn_is_ram(page_nr)) )
{
- MEM_LOG("Pfn %08lx is not RAM", page_nr);
+ MEM_LOG("Pfn %p is not RAM", page_nr);
return 0;
}
if ( unlikely(!get_page(page, d)) )
{
- MEM_LOG("Could not get page ref for pfn %08lx", page_nr);
+ MEM_LOG("Could not get page ref for pfn %p", page_nr);
return 0;
}
if ( unlikely(!get_page_type(page, type)) )
{
-#ifdef VERBOSE
if ( (type & PGT_type_mask) != PGT_l1_page_table )
- MEM_LOG("Bad page type for pfn %08lx (%08x)",
+ MEM_LOG("Bad page type for pfn %p (%08x)",
page_nr, page->u.inuse.type_info);
-#endif
put_page(page);
return 0;
}
if ( IS_CAPABLE_PHYSDEV(d) )
return domain_iomem_in_pfn(d, pfn);
- MEM_LOG("Non-privileged attempt to map I/O space %08lx", pfn);
+ MEM_LOG("Non-privileged attempt to map I/O space %p", pfn);
return 0;
}
l2_pgentry_val(ol2e),
l2_pgentry_val(nl2e));
if ( o != l2_pgentry_val(ol2e) )
- MEM_LOG("Failed to update %08lx -> %08lx: saw %08lx\n",
+ MEM_LOG("Failed to update %p -> %p: saw %p\n",
l2_pgentry_val(ol2e), l2_pgentry_val(nl2e), o);
return (o == l2_pgentry_val(ol2e));
}
if ( unlikely(cmpxchg_user(pl1e, o, n) != 0) ||
unlikely(o != l1_pgentry_val(ol1e)) )
{
- MEM_LOG("Failed to update %08lx -> %08lx: saw %08lx\n",
+ MEM_LOG("Failed to update %p -> %p: saw %p\n",
l1_pgentry_val(ol1e), l1_pgentry_val(nl1e), o);
return 0;
}
nx = x + 1;
if ( unlikely((nx & PGT_count_mask) == 0) )
{
- MEM_LOG("Type count overflow on pfn %08lx\n", page_to_pfn(page));
+ MEM_LOG("Type count overflow on pfn %p\n", page_to_pfn(page));
return 0;
}
else if ( unlikely((x & PGT_count_mask) == 0) )
{
if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
((type & PGT_type_mask) != PGT_l1_page_table) )
- MEM_LOG("Bad type (saw %08x != exp %08x) for pfn %08lx\n",
+ MEM_LOG("Bad type (saw %08x != exp %08x) for pfn %p\n",
x & PGT_type_mask, type, page_to_pfn(page));
return 0;
}
/* Try to validate page type; drop the new reference on failure. */
if ( unlikely(!alloc_page_type(page, type & PGT_type_mask)) )
{
- MEM_LOG("Error while validating pfn %08lx for type %08x."
+ MEM_LOG("Error while validating pfn %p for type %08x."
" caf=%08x taf=%08x\n",
page_to_pfn(page), type,
page->count_info,
}
else
{
- MEM_LOG("Error while installing new baseptr %08lx", pfn);
+ MEM_LOG("Error while installing new baseptr %p", pfn);
}
return okay;
if ( unlikely(!okay) )
{
- MEM_LOG("Error while pinning pfn %08lx", pfn);
+ MEM_LOG("Error while pinning pfn %p", pfn);
break;
}
if ( unlikely(test_and_set_bit(_PGT_pinned,
&page->u.inuse.type_info)) )
{
- MEM_LOG("Pfn %08lx already pinned", pfn);
+ MEM_LOG("Pfn %p already pinned", pfn);
put_page_and_type(page);
okay = 0;
break;
case MMUEXT_UNPIN_TABLE:
if ( unlikely(!(okay = get_page_from_pagenr(pfn, FOREIGNDOM))) )
{
- MEM_LOG("Page %08lx bad domain (dom=%p)",
+ MEM_LOG("Page %p bad domain (dom=%p)",
ptr, page_get_owner(page));
}
else if ( likely(test_and_clear_bit(_PGT_pinned,
{
okay = 0;
put_page(page);
- MEM_LOG("Pfn %08lx not pinned", pfn);
+ MEM_LOG("Pfn %p not pinned", pfn);
}
break;
((ptr+ents*LDT_ENTRY_SIZE) > PAGE_OFFSET) )
{
okay = 0;
- MEM_LOG("Bad args to SET_LDT: ptr=%08lx, ents=%08lx", ptr, ents);
+ MEM_LOG("Bad args to SET_LDT: ptr=%p, ents=%p", ptr, ents);
}
else if ( (ed->arch.ldt_ents != ents) ||
(ed->arch.ldt_base != ptr) )
unlikely(!pfn_is_ram(pfn)) ||
unlikely((e = find_domain_by_id(domid)) == NULL) )
{
- MEM_LOG("Bad frame (%08lx) or bad domid (%d).\n", pfn, domid);
+ MEM_LOG("Bad frame (%p) or bad domid (%d).\n", pfn, domid);
okay = 0;
break;
}
(1|PGC_allocated)) ||
unlikely(nd != d) )
{
- MEM_LOG("Bad page values %08lx: ed=%p(%u), sd=%p,"
+ MEM_LOG("Bad page values %p: ed=%p(%u), sd=%p,"
" caf=%08x, taf=%08x\n", page_to_pfn(page),
d, d->id, nd, x, page->u.inuse.type_info);
spin_unlock(&d->page_alloc_lock);
unlikely(!gnttab_prepare_for_transfer(e, d, gntref)) )
{
MEM_LOG("Transferee has no reservation headroom (%d,%d), or "
- "provided a bad grant ref, or is dying (%08lx).\n",
+ "provided a bad grant ref, or is dying (%p).\n",
e->tot_pages, e->max_pages, e->d_flags);
spin_unlock(&e->page_alloc_lock);
put_domain(e);
e = percpu_info[cpu].foreign;
if ( unlikely(e == NULL) )
{
- MEM_LOG("No FOREIGNDOM to reassign pfn %08lx to", pfn);
+ MEM_LOG("No FOREIGNDOM to reassign pfn %p to", pfn);
okay = 0;
break;
}
(1|PGC_allocated)) ||
unlikely(nd != d) )
{
- MEM_LOG("Bad page values %08lx: ed=%p(%u), sd=%p,"
+ MEM_LOG("Bad page values %p: ed=%p(%u), sd=%p,"
" caf=%08x, taf=%08x\n", page_to_pfn(page),
d, d->id, nd, x, page->u.inuse.type_info);
okay = 0;
break;
default:
- MEM_LOG("Invalid extended pt command 0x%08lx", val & MMUEXT_CMD_MASK);
+ MEM_LOG("Invalid extended pt command 0x%p", val & MMUEXT_CMD_MASK);
okay = 0;
break;
}
break;
default:
- MEM_LOG("Invalid page update command %08lx", req.ptr);
+ MEM_LOG("Invalid page update command %p", req.ptr);
break;
}
*/
BUG();
}
- PTWR_PRINTK("[%c] disconnected_l1va at %p is %08lx\n",
+ PTWR_PRINTK("[%c] disconnected_l1va at %p is %p\n",
PTWR_PRINT_WHICH, ptep, pte);
pte &= ~_PAGE_RW;
#else
flush_tlb_all();
#endif
- PTWR_PRINTK("[%c] disconnected_l1va at %p now %08lx\n",
+ PTWR_PRINTK("[%c] disconnected_l1va at %p now %p\n",
PTWR_PRINT_WHICH, ptep, pte);
/*
if ( l2_idx == (addr >> L2_PAGETABLE_SHIFT) )
{
- MEM_LOG("PTWR failure! Pagetable maps itself at %08lx\n", addr);
+ MEM_LOG("PTWR failure! Pagetable maps itself at %p\n", addr);
domain_crash();
}
}
}
- PTWR_PRINTK("[%c] page_fault on l1 pt at va %08lx, pt for %08x, "
- "pfn %08lx\n", PTWR_PRINT_WHICH,
+ PTWR_PRINTK("[%c] page_fault on l1 pt at va %p, pt for %08x, "
+ "pfn %p\n", PTWR_PRINT_WHICH,
addr, l2_idx << L2_PAGETABLE_SHIFT, pfn);
/*
/* Finally, make the p.t. page writable by the guest OS. */
pte |= _PAGE_RW;
- PTWR_PRINTK("[%c] update %p pte to %08lx\n", PTWR_PRINT_WHICH,
+ PTWR_PRINTK("[%c] update %p pte to %p\n", PTWR_PRINT_WHICH,
&linear_pg_table[addr>>PAGE_SHIFT], pte);
if ( unlikely(__put_user(pte, (unsigned long *)
&linear_pg_table[addr>>PAGE_SHIFT])) )
page = &frame_table[pfn];
printk("need to alloc l1 page %p\n", page);
/* make pt page writable */
- printk("need to make read-only l1-page at %p is %08lx\n",
+ printk("need to make read-only l1-page at %p is %p\n",
ptep, pte);
if ( ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va == 0 )
ASSERT(spin_is_locked(&d->arch.shadow_lock));
- SH_VLOG("shadow mode table op %08lx %08lx count %d",
+ SH_VLOG("shadow mode table op %p %p count %d",
pagetable_val(d->exec_domain[0]->arch.pagetable), /* XXX SMP */
pagetable_val(d->exec_domain[0]->arch.shadow_table), /* XXX SMP */
d->arch.shadow_page_count);
unsigned long smfn;
struct domain *d = page_get_owner(&frame_table[gpfn]);
- SH_VLOG("unshadow_table type=%08x gpfn=%08lx", type, gpfn);
+ SH_VLOG("unshadow_table type=%08x gpfn=%p", type, gpfn);
perfc_incrc(unshadow_table_count);
guest_gpfn = __mfn_to_gpfn(d, gpfn);
- SH_VVLOG("shadow_l2_table( %08lx )", gpfn);
+ SH_VVLOG("shadow_l2_table( %p )", gpfn);
perfc_incrc(shadow_l2_table_count);
if ( shadow_mode(d) != SHM_full_32 )
unmap_domain_mem(spl2e);
- SH_VLOG("shadow_l2_table( %08lx -> %08lx)", gpfn, spfn);
+ SH_VLOG("shadow_l2_table( %p -> %p)", gpfn, spfn);
return spfn;
}
if ( !(sl1ss & PSH_shadowed) )
{
/* This L1 is NOT already shadowed so we need to shadow it. */
- SH_VVLOG("4a: l1 not shadowed ( %08lx )", sl1pfn);
+ SH_VVLOG("4a: l1 not shadowed ( %p )", sl1pfn);
sl1pfn_info = alloc_shadow_page(d);
sl1pfn_info->u.inuse.type_info = PGT_l1_page_table;
else
{
/* This L1 is shadowed already, but the L2 entry is missing. */
- SH_VVLOG("4b: was shadowed, l2 missing ( %08lx )", sl1pfn);
+ SH_VVLOG("4b: was shadowed, l2 missing ( %p )", sl1pfn);
sl1pfn = sl1ss & PSH_pfn_mask;
l2pde_general(d, &gl2e, &sl2e, sl1pfn);
struct exec_domain *ed = current;
struct domain *d = ed->domain;
- SH_VVLOG("shadow_fault( va=%08lx, code=%ld )", va, error_code );
+ SH_VVLOG("shadow_fault( va=%p, code=%ld )", va, error_code );
check_pagetable(d, ed->arch.pagetable, "pre-sf");
if ( unlikely(__put_user(spte, (unsigned long *)
&shadow_linear_pg_table[va >> PAGE_SHIFT])) )
{
- SH_VVLOG("3: not shadowed/mapped gpte=%08lx spte=%08lx", gpte, spte);
+ SH_VVLOG("3: not shadowed/mapped gpte=%p spte=%p", gpte, spte);
shadow_map_l1_into_current_l2(va);
shadow_linear_pg_table[va >> PAGE_SHIFT] = mk_l1_pgentry(spte);
}
l1_pgentry_t *spl1e, *prev_spl1e = *prev_spl1e_ptr;
/* N.B. To get here, we know the l1 page *must* be shadowed. */
- SH_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, "
- "prev_smfn=%08lx, prev_spl1e=%p",
+ SH_VVLOG("shadow_l1_normal_pt_update pa=%p, gpte=%p, "
+ "prev_smfn=%p, prev_spl1e=%p",
pa, gpte, prev_smfn, prev_spl1e);
smfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask;
unsigned long sl1mfn;
/* N.B. To get here, we know the l2 page *must* be shadowed. */
- SH_VVLOG("shadow_l2_normal_pt_update pa=%08lx, gpde=%08lx",pa,gpde);
+ SH_VVLOG("shadow_l2_normal_pt_update pa=%p, gpde=%p",pa,gpde);
sl2mfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask;
#define FAIL(_f, _a...) \
do { \
- printk("XXX %s-FAIL (%d,%d)" _f " g=%08lx s=%08lx &g=%08lx &s=%08lx" \
- " pa(&g)=%08lx pa(&s)=%08lx\n", \
+ printk("XXX %s-FAIL (%d,%d)" _f " g=%p s=%p &g=%p &s=%p" \
+ " pa(&g)=%p pa(&s)=%p\n", \
sh_check_name, level, i, ## _a , gpte, spte, pgpte, pspte, \
virt_to_phys2(pgpte), virt_to_phys2(pspte)); \
errors++; \
FAIL("Shadow in L1 entry?");
if ( __shadow_status(d, gpfn) != (PSH_shadowed | smfn) )
- FAIL("smfn problem g.sf=%08lx",
+ FAIL("smfn problem g.sf=%p",
__shadow_status(d, gpfn) );
}
if ( ! (ss & PSH_shadowed) )
{
- printk("%s-PT %08lx not shadowed\n", s, gptbase);
+ printk("%s-PT %p not shadowed\n", s, gptbase);
if ( ss != 0 )
BUG();
for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
i < (SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT);
i++ )
- printk("+++ (%d) %08lx %08lx\n",i,
+ printk("+++ (%d) %p %p\n",i,
l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]));
FAILPT("hypervisor entries inconsistent");
}
if ( (l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >>
L2_PAGETABLE_SHIFT]) !=
((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR)) )
- FAILPT("hypervisor shadow linear map inconsistent %08lx %08lx",
+ FAILPT("hypervisor shadow linear map inconsistent %p %p",
l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >>
L2_PAGETABLE_SHIFT]),
(smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
perfc_incrc(page_faults);
-#if 0
- printk("do_page_fault(addr=0x%08lx, error_code=%d)\n",
- addr, regs->error_code);
- show_registers(regs);
-#endif
-
if ( likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
{
LOCK_BIGLOCK(d);
__vmread(GUEST_EIP, &eip);
- VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg:eip=%08lx, va=%08lx\n",
+ VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg:eip=%p, va=%p\n",
eip, va);
/*
__vmread(GUEST_EIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
- "vmx_io_instruction: eip=%08lx, exit_qualification = %lx\n",
+ "vmx_io_instruction: eip=%p, exit_qualification = %lx\n",
eip, exit_qualification);
if (test_bit(6, &exit_qualification))
unsigned long eip;
__vmread(GUEST_EIP, &eip);
#endif
- VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%08lx\n", eip);
+ VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%p\n", eip);
__enter_scheduler();
}
unsigned long eip;
__vmread(GUEST_EIP, &eip);
#endif
- VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%08lx\n", eip);
+ VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%p\n", eip);
__enter_scheduler();
}
break;
case EXIT_REASON_MSR_WRITE:
__vmread(GUEST_EIP, &eip);
- VMX_DBG_LOG(DBG_LEVEL_1, "MSR_WRITE: eip=%08lx, eax=%08lx, edx=%08lx",
+ VMX_DBG_LOG(DBG_LEVEL_1, "MSR_WRITE: eip=%p, eax=%p, edx=%p",
eip, regs.eax, regs.edx);
/* just ignore this point */
__get_instruction_length(inst_len);
{
if ( unlikely((mpfn + j) >= max_page) )
{
- DPRINTK("Domain %u page number out of range (%08lx>=%08lx)\n",
+ DPRINTK("Domain %u page number out of range (%p>=%p)\n",
d->id, mpfn + j, max_page);
return i;
}
if ( (bad_pfn < (bitmap_size*8)) && !allocated_in_map(bad_pfn) )
{
- printk("Marking page %08lx as bad\n", bad_pfn);
+ printk("Marking page %p as bad\n", bad_pfn);
map_alloc(bad_pfn, 1);
}
}
phys_dev_t *phys_dev;
VERBOSE_INFO("Checking if physdev-capable domain %u needs access to "
- "pfn %08lx\n", p->id, pfn);
+ "pfn %p\n", p->id, pfn);
spin_lock(&p->pcidev_lock);
spin_unlock(&p->pcidev_lock);
- VERBOSE_INFO("Domain %u %s mapping of pfn %08lx\n",
+ VERBOSE_INFO("Domain %u %s mapping of pfn %p\n",
p->id, ret ? "allowed" : "disallowed", pfn);
return ret;
#define IS_XEN_HEAP_FRAME(_pfn) (page_to_phys(_pfn) < xenheap_phys_end)
#if defined(__i386__)
-
#define pickle_domptr(_d) ((u32)(unsigned long)(_d))
#define unpickle_domptr(_d) ((struct domain *)(unsigned long)(_d))
-
#elif defined(__x86_64__)
static inline struct domain *unpickle_domptr(u32 _domain)
{ return (_domain == 0) ? NULL : __va(_domain); }
static inline u32 pickle_domptr(struct domain *domain)
{ return (domain == NULL) ? 0 : (u32)__pa(domain); }
-
#endif
#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
unlikely(d != _domain) ) /* Wrong owner? */
{
- DPRINTK("Error pfn %08lx: ed=%p, sd=%p, caf=%08x, taf=%08x\n",
+ DPRINTK("Error pfn %p: ed=%p, sd=%p, caf=%08x, taf=%08x\n",
page_to_pfn(page), domain, unpickle_domptr(d),
x, page->u.inuse.type_info);
return 0;
spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
- SH_VVLOG("l1pte_write_fault: updating spte=0x%08lx gpte=0x%08lx", spte, gpte);
+ SH_VVLOG("l1pte_write_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
*gpte_p = gpte;
*spte_p = spte;
}
if ( (shadow_mode(d) == SHM_logdirty) || ! (gpte & _PAGE_DIRTY) )
spte &= ~_PAGE_RW;
- SH_VVLOG("l1pte_read_fault: updating spte=0x%08lx gpte=0x%08lx", spte, gpte);
+ SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
*gpte_p = gpte;
*spte_p = spte;
}
#if SHADOW_VERBOSE_DEBUG
if ( old_spte || spte || gpte )
- SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%08lx, old spte=0x%08lx, new spte=0x%08lx ", gpte, old_spte, spte);
+ SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%p, old spte=0x%p, new spte=0x%p ", gpte, old_spte, spte);
#endif
*gpte_p = gpte;
live++;
if ( (a->pfn == 0) || (a->smfn_and_flags == 0) )
{
- printk("XXX live=%d pfn=%08lx sp=%08lx next=%p\n",
+ printk("XXX live=%d pfn=%p sp=%p next=%p\n",
live, a->pfn, a->smfn_and_flags, a->next);
BUG();
}
SWAP(head->smfn_and_flags, x->smfn_and_flags);
}
- SH_VVLOG("lookup gpfn=%08lx => status=%08lx",
+ SH_VVLOG("lookup gpfn=%p => status=%p",
gpfn, head->smfn_and_flags);
return head->smfn_and_flags;
}
}
while ( x != NULL );
- SH_VVLOG("lookup gpfn=%08lx => status=0", gpfn);
+ SH_VVLOG("lookup gpfn=%p => status=0", gpfn);
return 0;
}
x = head = hash_bucket(d, gpfn);
- SH_VVLOG("set gpfn=%08x s=%08lx bucket=%p(%p)", gpfn, s, x, x->next);
+ SH_VVLOG("set gpfn=%08x s=%p bucket=%p(%p)", gpfn, s, x, x->next);
shadow_audit(d, 0);
/*
unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT;
unsigned long smfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
- SH_VVLOG("0: __shadow_mk_pagetable(gpfn=%08lx, smfn=%08lx)", gpfn, smfn);
+ SH_VVLOG("0: __shadow_mk_pagetable(gpfn=%p, smfn=%p)", gpfn, smfn);
if ( unlikely(smfn == 0) )
smfn = shadow_l2_table(d, gpfn);
{
if ( unlikely(shadow_mode(ed->domain)) )
{
- SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
+ SH_VVLOG("shadow_mk_pagetable( gptbase=%p, mode=%d )",
pagetable_val(ed->arch.pagetable),
shadow_mode(ed->domain));
shadow_unlock(ed->domain);
SH_VVLOG("leaving shadow_mk_pagetable:\n"
- "( gptbase=%08lx, mode=%d ) sh=%08lx",
+ "( gptbase=%p, mode=%d ) sh=%p",
pagetable_val(ed->arch.pagetable),
shadow_mode(ed->domain),
pagetable_val(ed->arch.shadow_table) );
u64 r14;
u64 r13;
u64 r12;
- union { u64 rbp; u32 ebp; } __attribute__ ((packed));
- union { u64 rbx; u32 ebx; } __attribute__ ((packed));
+ union { u64 rbp; u64 ebp; } __attribute__ ((packed));
+ union { u64 rbx; u64 ebx; } __attribute__ ((packed));
/* NB. Above here is C callee-saves. */
u64 r11;
u64 r10;
u64 r9;
u64 r8;
- union { u64 rax; u32 eax; } __attribute__ ((packed));
- union { u64 rcx; u32 ecx; } __attribute__ ((packed));
- union { u64 rdx; u32 edx; } __attribute__ ((packed));
- union { u64 rsi; u32 esi; } __attribute__ ((packed));
- union { u64 rdi; u32 edi; } __attribute__ ((packed));
+ union { u64 rax; u64 eax; } __attribute__ ((packed));
+ union { u64 rcx; u64 ecx; } __attribute__ ((packed));
+ union { u64 rdx; u64 edx; } __attribute__ ((packed));
+ union { u64 rsi; u64 esi; } __attribute__ ((packed));
+ union { u64 rdi; u64 edi; } __attribute__ ((packed));
u32 error_code;
u32 entry_vector;
union { u64 rip; u64 eip; } __attribute__ ((packed));